Ubuntu 16.04
Sponsored Link

Configure RAID 1
2016/07/07
 
Configure RAID 1 to add 2 new Hard Drives on a server.
[1] This example is based on the environment below. The new Hard Drives sdb and sdc are newly added on it and configure RAID 1.
root@dlp:~#
df -h

Filesystem                   Size  Used Avail Use% Mounted on
udev                         2.0G     0  2.0G   0% /dev
tmpfs                        396M  5.6M  390M   2% /run
/dev/mapper/ubuntu--vg-root   25G  2.5G   22G  11% /
tmpfs                        2.0G     0  2.0G   0% /dev/shm
tmpfs                        5.0M     0  5.0M   0% /run/lock
tmpfs                        2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/sda1                    472M  193M  255M  44% /boot
tmpfs                        396M     0  396M   0% /run/user/0
[2] Create a partition on new HDD and set RAID flag.
root@dlp:~#
parted --script /dev/sdb "mklabel gpt"

root@dlp:~#
parted --script /dev/sdc "mklabel gpt"

root@dlp:~#
parted --script /dev/sdb "mkpart primary 0% 100%"

root@dlp:~#
parted --script /dev/sdc "mkpart primary 0% 100%"

root@dlp:~#
parted --script /dev/sdb "set 1 raid on"

root@dlp:~#
parted --script /dev/sdc "set 1 raid on"
[3] Configure RAID 1.
# install tools

root@dlp:~#
apt-get -y install mdadm
root@dlp:~#
mdadm --create /dev/md0 --level=raid1 --raid-devices=2 /dev/sdb1 /dev/sdc1

mdadm: Note: this array has metadata at the start and
    may not be suitable as a boot device.  If you plan to
    store '/boot' on this device please ensure that
    your boot-loader understands md/v1.x metadata, or use
    --metadata=0.90
Continue creating array? y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.

# show status

root@dlp:~#
cat /proc/mdstat

Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md0 : active raid1 sdc1[1] sdb1[0]
      20953088 blocks super 1.2 [2/2] [UU]
      [=>...................]  resync =  6.2% (1309632/20953088) finish=2.7min speed=119057K/sec

unused devices: <none>

# after few hours later, status turns like follows if syncing finished

root@dlp:~#
cat /proc/mdstat

Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md0 : active raid1 sdc1[1] sdb1[0]
      20953088 blocks super 1.2 [2/2] [UU]

unused devices: <none>
[4] For example, if a member HDD in RAID array would be failure, re-configure RAID 1 like follows.
# the status is like follows in failure

root@dlp:~#
cat /proc/mdstat

Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md0 : active raid1 sdc1[1] sdb1[0]
      20952960 blocks super 1.2 [2/1] [U_]

unused devices: <none>

# after swapping new disk, re-configure like follows

root@dlp:~#
mdadm --manage /dev/md0 --add /dev/sdc1

mdadm: added /dev/sdc1
root@dlp:~#
cat /proc/mdstat

Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md0 : active raid1 sdc1[1] sdb1[0]
      20952960 blocks super 1.2 [2/2] [UU]
      [=====>...............]  resync = 25.0% (5238272/20952960) finish=2.1min speed=119051K/sec

unused devices: <none>
 
Tweet